* Extra ring macros to sync a consumer index up to the public producer index.
* Generally UNSAFE, but we use it for recovery and shutdown in some cases.
*/
-#define RING_DROP_PENDING_REQUESTS(_p, _r) \
+#define RING_DROP_PENDING_REQUESTS(_r) \
do { \
(_r)->req_cons = (_r)->sring->req_prod; \
} while (0)
-#define RING_DROP_PENDING_RESPONSES(_p, _r) \
+#define RING_DROP_PENDING_RESPONSES(_r) \
do { \
(_r)->rsp_cons = (_r)->sring->rsp_prod; \
} while (0)
static void __ctrl_if_tx_tasklet(unsigned long data)
{
ctrl_msg_t *msg;
- int was_full = RING_FULL(CTRL_RING, &ctrl_if_tx_ring);
+ int was_full = RING_FULL(&ctrl_if_tx_ring);
RING_IDX i, rp;
i = ctrl_if_tx_ring.rsp_cons;
for ( ; i != rp; i++ )
{
- msg = RING_GET_RESPONSE(CTRL_RING, &ctrl_if_tx_ring, i);
+ msg = RING_GET_RESPONSE(&ctrl_if_tx_ring, i);
DPRINTK("Rx-Rsp %u/%u :: %d/%d\n", i-1,
ctrl_if_tx_ring.sring->rsp_prod,
smp_mb();
ctrl_if_tx_ring.rsp_cons = i;
- if ( was_full && !RING_FULL(CTRL_RING, &ctrl_if_tx_ring) )
+ if ( was_full && !RING_FULL(&ctrl_if_tx_ring) )
{
wake_up(&ctrl_if_tx_wait);
run_task_queue(&ctrl_if_tx_tq);
for ( ; i != rp; i++)
{
- pmsg = RING_GET_REQUEST(CTRL_RING, &ctrl_if_rx_ring, i);
+ pmsg = RING_GET_REQUEST(&ctrl_if_rx_ring, i);
memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
DPRINTK("Rx-Req %u/%u :: %d/%d\n", i-1,
static irqreturn_t ctrl_if_interrupt(int irq, void *dev_id,
struct pt_regs *regs)
{
- if ( RING_HAS_UNCONSUMED_RESPONSES(CTRL_RING, &ctrl_if_tx_ring) )
+ if ( RING_HAS_UNCONSUMED_RESPONSES(&ctrl_if_tx_ring) )
tasklet_schedule(&ctrl_if_tx_tasklet);
- if ( RING_HAS_UNCONSUMED_REQUESTS(CTRL_RING, &ctrl_if_rx_ring) )
+ if ( RING_HAS_UNCONSUMED_REQUESTS(&ctrl_if_rx_ring) )
tasklet_schedule(&ctrl_if_rx_tasklet);
return IRQ_HANDLED;
spin_lock_irqsave(&ctrl_if_lock, flags);
- if ( RING_FULL(CTRL_RING, &ctrl_if_tx_ring) )
+ if ( RING_FULL(&ctrl_if_tx_ring) )
{
spin_unlock_irqrestore(&ctrl_if_lock, flags);
return -EAGAIN;
ctrl_if_tx_ring.rsp_cons,
msg->type, msg->subtype);
- dmsg = RING_GET_REQUEST(CTRL_RING, &ctrl_if_tx_ring,
+ dmsg = RING_GET_REQUEST(&ctrl_if_tx_ring,
ctrl_if_tx_ring.req_prod_pvt);
memcpy(dmsg, msg, sizeof(*msg));
ctrl_if_tx_ring.req_prod_pvt++;
- RING_PUSH_REQUESTS(CTRL_RING, &ctrl_if_tx_ring);
+ RING_PUSH_REQUESTS(&ctrl_if_tx_ring);
spin_unlock_irqrestore(&ctrl_if_lock, flags);
struct tq_struct *task)
{
/* Fast path. */
- if ( !RING_FULL(CTRL_RING, &ctrl_if_tx_ring) )
+ if ( !RING_FULL(&ctrl_if_tx_ring) )
return 0;
(void)queue_task(task, &ctrl_if_tx_tq);
* certainly return 'not full'.
*/
smp_mb();
- return RING_FULL(CTRL_RING, &ctrl_if_tx_ring);
+ return RING_FULL(&ctrl_if_tx_ring);
}
void
ctrl_if_rx_ring.rsp_prod_pvt,
msg->type, msg->subtype);
- dmsg = RING_GET_RESPONSE(CTRL_RING, &ctrl_if_rx_ring,
+ dmsg = RING_GET_RESPONSE(&ctrl_if_rx_ring,
ctrl_if_rx_ring.rsp_prod_pvt);
if ( dmsg != msg )
memcpy(dmsg, msg, sizeof(*msg));
ctrl_if_rx_ring.rsp_prod_pvt++;
- RING_PUSH_RESPONSES(CTRL_RING, &ctrl_if_rx_ring);
+ RING_PUSH_RESPONSES(&ctrl_if_rx_ring);
spin_unlock_irqrestore(&ctrl_if_lock, flags);
}
/* Sync up with shared indexes. */
- RING_DROP_PENDING_RESPONSES(CTRL_RING, &ctrl_if_tx_ring);
- RING_DROP_PENDING_REQUESTS(CTRL_RING, &ctrl_if_rx_ring);
+ RING_DROP_PENDING_RESPONSES(&ctrl_if_tx_ring);
+ RING_DROP_PENDING_REQUESTS(&ctrl_if_rx_ring);
ctrl_if_evtchn = xen_start_info.domain_controller_evtchn;
ctrl_if_irq = bind_evtchn_to_irq(ctrl_if_evtchn);
for ( i = 0; i < 256; i++ )
ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
- FRONT_RING_ATTACH(CTRL_RING, &ctrl_if_tx_ring, &ctrl_if->tx_ring);
- BACK_RING_ATTACH(CTRL_RING, &ctrl_if_rx_ring, &ctrl_if->rx_ring);
+ FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring);
+ BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring);
spin_lock_init(&ctrl_if_lock);
void ctrl_if_discard_responses(void)
{
- RING_DROP_PENDING_RESPONSES(CTRL_RING, &ctrl_if_tx_ring);
+ RING_DROP_PENDING_RESPONSES(&ctrl_if_tx_ring);
}
EXPORT_SYMBOL(ctrl_if_send_message_noblock);
rmb(); /* Ensure we see queued requests up to 'rp'. */
for ( i = blk_ring->req_cons;
- (i != rp) && !RING_REQUEST_CONS_OVERFLOW(BLKIF_RING, blk_ring, i);
+ (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
i++ )
{
if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
break;
}
- req = RING_GET_REQUEST(BLKIF_RING, blk_ring, i);
+ req = RING_GET_REQUEST(blk_ring, i);
switch ( req->operation )
{
case BLKIF_OP_READ:
/* Place on the response ring for the relevant domain. */
spin_lock_irqsave(&blkif->blk_ring_lock, flags);
- resp = RING_GET_RESPONSE(BLKIF_RING, blk_ring, blk_ring->rsp_prod_pvt);
+ resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
resp->id = id;
resp->operation = op;
resp->status = st;
wmb(); /* Ensure other side can see the response fields. */
blk_ring->rsp_prod_pvt++;
- RING_PUSH_RESPONSES(BLKIF_RING, blk_ring);
+ RING_PUSH_RESPONSES(blk_ring);
spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
/* Kick the relevant domain. */
return;
}
sring = (blkif_sring_t *)vma->addr;
- SHARED_RING_INIT(BLKIF_RING, sring);
- BACK_RING_INIT(BLKIF_RING, &blkif->blk_ring, sring);
+ SHARED_RING_INIT(sring);
+ BACK_RING_INIT(&blkif->blk_ring, sring);
blkif->evtchn = evtchn;
blkif->irq = bind_evtchn_to_irq(evtchn);
static blkif_front_ring_t blk_ring;
unsigned long rec_ring_free;
-blkif_request_t rec_ring[RING_SIZE(BLKIF_RING, &blk_ring)];
+blkif_request_t rec_ring[RING_SIZE(&blk_ring)];
static int recovery = 0; /* "Recovery in progress" flag. Protected
* by the blkif_io_lock */
{
unsigned long free = rec_ring_free;
- if ( free > RING_SIZE(BLKIF_RING, &blk_ring) )
+ if ( free > RING_SIZE(&blk_ring) )
BUG();
rec_ring_free = rec_ring[free].id;
static inline void flush_requests(void)
{
DISABLE_SCATTERGATHER();
- RING_PUSH_REQUESTS(BLKIF_RING, &blk_ring);
+ RING_PUSH_REQUESTS(&blk_ring);
notify_via_evtchn(blkif_evtchn);
}
return 1;
/* Fill out a communications ring structure. */
- ring_req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, blk_ring.req_prod_pvt);
+ ring_req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
id = GET_ID_FROM_FREELIST();
rec_ring[id].id = (unsigned long) req;
continue;
}
- if ( RING_FULL(BLKIF_RING, &blk_ring) )
+ if ( RING_FULL(&blk_ring) )
{
blk_stop_queue(rq);
break;
{
unsigned long id;
- bret = RING_GET_RESPONSE(BLKIF_RING, &blk_ring, i);
+ bret = RING_GET_RESPONSE(&blk_ring, i);
id = bret->id;
req = (struct request *)rec_ring[id].id;
blkif_completion( &rec_ring[id] );
{
/* We kick pending request queues if the ring is reasonably empty. */
if ( (nr_pending != 0) &&
- (RING_PENDING_REQUESTS(BLKIF_RING, &blk_ring) <
- (RING_SIZE(BLKIF_RING, &blk_ring) >> 1)) )
+ (RING_PENDING_REQUESTS(&blk_ring) <
+ (RING_SIZE(&blk_ring) >> 1)) )
{
/* Attempt to drain the queue, but bail if the ring becomes full. */
- while ( (nr_pending != 0) && !RING_FULL(BLKIF_RING, &blk_ring) )
+ while ( (nr_pending != 0) && !RING_FULL(&blk_ring) )
do_blkif_request(pending_queues[--nr_pending]);
}
}
(sg_dev == device) &&
(sg_next_sect == sector_number) )
{
- req = RING_GET_REQUEST(BLKIF_RING, &blk_ring,
+ req = RING_GET_REQUEST(&blk_ring,
blk_ring.req_prod_pvt - 1);
bh = (struct buffer_head *)id;
return 0;
}
- else if ( RING_FULL(BLKIF_RING, &blk_ring) )
+ else if ( RING_FULL(&blk_ring) )
{
return 1;
}
}
/* Fill out a communications ring structure. */
- req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, blk_ring.req_prod_pvt);
+ req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
xid = GET_ID_FROM_FREELIST();
rec_ring[xid].id = id;
unsigned long id;
blkif_response_t *bret;
- bret = RING_GET_RESPONSE(BLKIF_RING, &blk_ring, i);
+ bret = RING_GET_RESPONSE(&blk_ring, i);
id = bret->id;
bh = (struct buffer_head *)rec_ring[id].id;
blkif_request_t *req_d;
retry:
- while ( RING_FULL(BLKIF_RING, &blk_ring) )
+ while ( RING_FULL(&blk_ring) )
{
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
spin_lock_irqsave(&blkif_io_lock, flags);
- if ( RING_FULL(BLKIF_RING, &blk_ring) )
+ if ( RING_FULL(&blk_ring) )
{
spin_unlock_irqrestore(&blkif_io_lock, flags);
goto retry;
}
DISABLE_SCATTERGATHER();
- req_d = RING_GET_REQUEST(BLKIF_RING, &blk_ring, blk_ring.req_prod_pvt);
+ req_d = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
*req_d = *req;
id = GET_ID_FROM_FREELIST();
free_page((unsigned long)blk_ring.sring);
sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
- SHARED_RING_INIT(BLKIF_RING, sring);
- FRONT_RING_INIT(BLKIF_RING, &blk_ring, sring);
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&blk_ring, sring);
blkif_state = BLKIF_STATE_DISCONNECTED;
blkif_send_interface_connect();
}
* This will need to be fixed once we have barriers */
/* Stage 1 : Find active and move to safety. */
- for ( i = 0; i < RING_SIZE(BLKIF_RING, &blk_ring); i++ )
+ for ( i = 0; i < RING_SIZE(&blk_ring); i++ )
{
if ( rec_ring[i].id >= PAGE_OFFSET )
{
- req = RING_GET_REQUEST(BLKIF_RING, &blk_ring,
+ req = RING_GET_REQUEST(&blk_ring,
blk_ring.req_prod_pvt);
translate_req_to_mfn(req, &rec_ring[i]);
blk_ring.req_prod_pvt++;
/* Stage 2 : Set up shadow list. */
for ( i = 0; i < blk_ring.req_prod_pvt; i++ )
{
- req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, i);
+ req = RING_GET_REQUEST(&blk_ring, i);
rec_ring[i].id = req->id;
req->id = i;
translate_req_to_pfn(&rec_ring[i], req);
}
/* Stage 3 : Set up free list. */
- for ( ; i < RING_SIZE(BLKIF_RING, &blk_ring); i++ )
+ for ( ; i < RING_SIZE(&blk_ring); i++ )
rec_ring[i].id = i+1;
rec_ring_free = blk_ring.req_prod_pvt;
- rec_ring[RING_SIZE(BLKIF_RING, &blk_ring)-1].id = 0x0fffffff;
+ rec_ring[RING_SIZE(&blk_ring)-1].id = 0x0fffffff;
/* blk_ring->req_prod will be set when we flush_requests().*/
wmb();
printk(KERN_INFO "xen_blk: Initialising virtual block device driver\n");
rec_ring_free = 0;
- for ( i = 0; i < RING_SIZE(BLKIF_RING, &blk_ring); i++ )
+ for ( i = 0; i < RING_SIZE(&blk_ring); i++ )
rec_ring[i].id = i+1;
- rec_ring[RING_SIZE(BLKIF_RING, &blk_ring)-1].id = 0x0fffffff;
+ rec_ring[RING_SIZE(&blk_ring)-1].id = 0x0fffffff;
(void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,
CALLBACK_IN_BLOCKING_CONTEXT);
}
sring = (blkif_sring_t *)vma->addr;
- SHARED_RING_INIT(BLKIF_RING, sring);
- BACK_RING_INIT(BLKIF_RING, &blkif->blk_ring, sring);
+ SHARED_RING_INIT(sring);
+ BACK_RING_INIT(&blkif->blk_ring, sring);
blkif->evtchn = evtchn;
blkif->irq = bind_evtchn_to_irq(evtchn);
blkif_sring_t *sring;
sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
- SHARED_RING_INIT(BLKIF_RING, sring);
- FRONT_RING_INIT(BLKIF_RING, &blktap_be_ring, sring);
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&blktap_be_ring, sring);
blktap_be_state = BLKIF_STATE_DISCONNECTED;
DPRINTK("Blkif-Passthrough-BE is now DISCONNECTED.\n");
blkif_ptbe_send_interface_connect();
ar = &active_reqs[ID_TO_IDX(rsp->id)];
rsp->id = ar->id;
- resp_d = RING_GET_RESPONSE(BLKIF_RING, &blkif->blk_ring,
+ resp_d = RING_GET_RESPONSE(&blkif->blk_ring,
blkif->blk_ring.rsp_prod_pvt);
memcpy(resp_d, rsp, sizeof(blkif_response_t));
wmb();
return 0;
}
- req_d = RING_GET_REQUEST(BLKIF_RING, &blktap_be_ring,
+ req_d = RING_GET_REQUEST(&blktap_be_ring,
blktap_be_ring.req_prod_pvt);
memcpy(req_d, req, sizeof(blkif_request_t));
wmb();
inline void kick_fe_domain(blkif_t *blkif)
{
- RING_PUSH_RESPONSES(BLKIF_RING, &blkif->blk_ring);
+ RING_PUSH_RESPONSES(&blkif->blk_ring);
notify_via_evtchn(blkif->evtchn);
DPRINTK("notified FE(dom %u)\n", blkif->domid);
return;
wmb(); /* Ensure that the frontend can see the requests. */
- RING_PUSH_REQUESTS(BLKIF_RING, &blktap_be_ring);
+ RING_PUSH_REQUESTS(&blktap_be_ring);
notify_via_evtchn(blktap_be_evtchn);
DPRINTK("notified BE\n");
}
for ( i = blkif->blk_ring.req_cons;
(i != rp) &&
- !RING_REQUEST_CONS_OVERFLOW(BLKIF_RING, &blkif->blk_ring, i);
+ !RING_REQUEST_CONS_OVERFLOW(&blkif->blk_ring, i);
i++ )
{
break;
}
- req_s = RING_GET_REQUEST(BLKIF_RING, &blkif->blk_ring, i);
+ req_s = RING_GET_REQUEST(&blkif->blk_ring, i);
/* This is a new request:
* Assign an active request record, and remap the id.
*/
/* copy the request message to the BERing */
DPRINTK("blktap: FERing[%u] -> BERing[%u]\n",
- (unsigned)__SHARED_RING_MASK(BLKIF_RING,
- blktap_be_ring.sring, i),
- (unsigned)__SHARED_RING_MASK(BLKIF_RING,
- blktap_be_ring.sring, blktap_be_ring.req_prod_pvt));
+ (unsigned)i & (RING_SIZE(&blktap_be_ring)-1),
+ (unsigned)blktap_be_ring.req_prod_pvt &
+ (RING_SIZE((&blktap_be_ring)-1)));
write_req_to_be_ring(req_s);
notify_be = 1;
for ( i = blktap_be_ring.rsp_cons; i != rp; i++)
{
- resp_s = RING_GET_RESPONSE(BLKIF_RING, &blktap_be_ring, i);
+ resp_s = RING_GET_RESPONSE(&blktap_be_ring, i);
/* BE -> FE interposition point is here. */
/* Copy the response message to FERing */
DPRINTK("blktap: BERing[%u] -> FERing[%u]\n",
- (unsigned)__SHARED_RING_MASK(BLKIF_RING,
- blkif->blk_ring.sring, i),
- (unsigned)__SHARED_RING_MASK(BLKIF_RING,
- blkif->blk_ring.sring,
- blkif->blk_ring.rsp_prod_pvt));
+ (unsigned)i & (RING_SIZE(&blkif->blk_ring)-1),
+ (unsigned)blkif->blk_ring.rsp_prod_pvt &
+ (RING_SIZE((&blkif->blk_ring)-1)));
write_resp_to_fe_ring(blkif, resp_s);
kick_fe_domain(blkif);
SetPageReserved(virt_to_page(csring));
- SHARED_RING_INIT(CTRL_RING, csring);
- FRONT_RING_INIT(CTRL_RING, &blktap_uctrl_ring, csring);
+ SHARED_RING_INIT(csring);
+ FRONT_RING_INIT(&blktap_uctrl_ring, csring);
/* Allocate the fe ring. */
SetPageReserved(virt_to_page(sring));
- SHARED_RING_INIT(BLKIF_RING, sring);
- FRONT_RING_INIT(BLKIF_RING, &blktap_ufe_ring, sring);
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&blktap_ufe_ring, sring);
/* Allocate the be ring. */
sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
SetPageReserved(virt_to_page(sring));
- SHARED_RING_INIT(BLKIF_RING, sring);
- BACK_RING_INIT(BLKIF_RING, &blktap_ube_ring, sring);
+ SHARED_RING_INIT(sring);
+ BACK_RING_INIT(&blktap_ube_ring, sring);
DPRINTK(KERN_ALERT "blktap open.\n");
{
poll_wait(file, &blktap_wait, wait);
- if ( RING_HAS_UNPUSHED_REQUESTS(BLKIF_RING, &blktap_uctrl_ring) ||
- RING_HAS_UNPUSHED_REQUESTS(BLKIF_RING, &blktap_ufe_ring) ||
- RING_HAS_UNPUSHED_RESPONSES(BLKIF_RING, &blktap_ube_ring) ) {
+ if ( RING_HAS_UNPUSHED_REQUESTS(&blktap_uctrl_ring) ||
+ RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring) ||
+ RING_HAS_UNPUSHED_RESPONSES(&blktap_ube_ring) ) {
- RING_PUSH_REQUESTS(BLKIF_RING, &blktap_uctrl_ring);
- RING_PUSH_REQUESTS(BLKIF_RING, &blktap_ufe_ring);
- RING_PUSH_RESPONSES(BLKIF_RING, &blktap_ube_ring);
+ RING_PUSH_REQUESTS(&blktap_uctrl_ring);
+ RING_PUSH_REQUESTS(&blktap_ufe_ring);
+ RING_PUSH_RESPONSES(&blktap_ube_ring);
return POLLIN | POLLRDNORM;
}
return 0;
}
- if ( RING_FULL(BLKIF_RING, &blktap_ufe_ring) ) {
+ if ( RING_FULL(&blktap_ufe_ring) ) {
DPRINTK("blktap: fe_ring is full, can't add.\n");
return 0;
}
- target = RING_GET_REQUEST(BLKIF_RING, &blktap_ufe_ring,
+ target = RING_GET_REQUEST(&blktap_ufe_ring,
blktap_ufe_ring.req_prod_pvt);
memcpy(target, req, sizeof(*req));
/* No test for fullness in the response direction. */
- target = RING_GET_RESPONSE(BLKIF_RING, &blktap_ube_ring,
+ target = RING_GET_RESPONSE(&blktap_ube_ring,
blktap_ube_ring.rsp_prod_pvt);
memcpy(target, rsp, sizeof(*rsp));
for ( i = blktap_ufe_ring.rsp_cons; i != rp; i++ )
{
- resp_s = RING_GET_RESPONSE(BLKIF_RING, &blktap_ufe_ring, i);
+ resp_s = RING_GET_RESPONSE(&blktap_ufe_ring, i);
DPRINTK("resp->fe_ring\n");
ar = lookup_active_req(ID_TO_IDX(resp_s->id));
rmb();
for ( i = blktap_ube_ring.req_cons; i != rp; i++ )
{
- req_s = RING_GET_REQUEST(BLKIF_RING, &blktap_ube_ring, i);
+ req_s = RING_GET_REQUEST(&blktap_ube_ring, i);
DPRINTK("req->be_ring\n");
write_req_to_be_ring(req_s);
/* No test for fullness in the response direction. */
- target = RING_GET_REQUEST(CTRL_RING, &blktap_uctrl_ring,
+ target = RING_GET_REQUEST(&blktap_uctrl_ring,
blktap_uctrl_ring.req_prod_pvt);
memcpy(target, msg, sizeof(*msg));
}
sring = (usbif_sring_t *)vma->addr;
- SHARED_RING_INIT(USBIF_RING, sring);
- BACK_RING_INIT(USBIF_RING, &up->usb_ring, sring);
+ SHARED_RING_INIT(sring);
+ BACK_RING_INIT(&up->usb_ring, sring);
up->evtchn = evtchn;
up->irq = bind_evtchn_to_irq(evtchn);
/* Take items off the comms ring, taking care not to overflow. */
for ( i = usb_ring->req_cons;
- (i != rp) && !RING_REQUEST_CONS_OVERFLOW(USBIF_RING, usb_ring, i);
+ (i != rp) && !RING_REQUEST_CONS_OVERFLOW(usb_ring, i);
i++ )
{
if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
break;
}
- req = RING_GET_REQUEST(USBIF_RING, usb_ring, i);
+ req = RING_GET_REQUEST(usb_ring, i);
switch ( req->operation )
{
/* Place on the response ring for the relevant domain. */
spin_lock_irqsave(&up->usb_ring_lock, flags);
- resp = RING_GET_RESPONSE(USBIF_RING, usb_ring, usb_ring->rsp_prod_pvt);
+ resp = RING_GET_RESPONSE(usb_ring, usb_ring->rsp_prod_pvt);
resp->id = id;
resp->operation = op;
resp->status = st;
dump_response(resp);
usb_ring->rsp_prod_pvt++;
- RING_PUSH_RESPONSES(USBIF_RING, usb_ring);
+ RING_PUSH_RESPONSES(usb_ring);
spin_unlock_irqrestore(&up->usb_ring_lock, flags);
/* Kick the relevant domain. */
#endif
- if ( RING_FULL(USBIF_RING, usb_ring) )
+ if ( RING_FULL(usb_ring) )
{
printk(KERN_WARNING
"xhci_queue_req(): USB ring full, not queuing request\n");
}
/* Stick something in the shared communications ring. */
- req = RING_GET_REQUEST(USBIF_RING, usb_ring, usb_ring->req_prod_pvt);
+ req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
req->operation = USBIF_OP_IO;
req->port = 0; /* We don't care what the port is. */
memset(req->setup, 0, 8);
usb_ring->req_prod_pvt++;
- RING_PUSH_REQUESTS(USBIF_RING, usb_ring);
+ RING_PUSH_REQUESTS(usb_ring);
notify_via_evtchn(xhci->evtchn);
usbif->resp_prod, xhci->usb_resp_cons);
#endif
- if ( RING_FULL(USBIF_RING, usb_ring) )
+ if ( RING_FULL(usb_ring) )
{
printk(KERN_WARNING
"xhci_queue_probe(): ring full, not queuing request\n");
}
/* Stick something in the shared communications ring. */
- req = RING_GET_REQUEST(USBIF_RING, usb_ring, usb_ring->req_prod_pvt);
+ req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
memset(req, sizeof(*req), 0);
req->port = port;
usb_ring->req_prod_pvt++;
- RING_PUSH_REQUESTS(USBIF_RING, usb_ring);
+ RING_PUSH_REQUESTS(usb_ring);
notify_via_evtchn(xhci->evtchn);
xhci->awaiting_reset = 1;
/* Stick something in the shared communications ring. */
- req = RING_GET_REQUEST(USBIF_RING, usb_ring, usb_ring->req_prod_pvt);
+ req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
memset(req, sizeof(*req), 0);
req->port = port;
usb_ring->req_prod_pvt++;
- RING_PUSH_REQUESTS(USBIF_RING, usb_ring);
+ RING_PUSH_REQUESTS(usb_ring);
notify_via_evtchn(xhci->evtchn);
/* Take items off the comms ring, taking care not to overflow. */
for ( i = usb_ring->rsp_cons; i != rp; i++ )
{
- resp = RING_GET_RESPONSE(USBIF_RING, usb_ring, i);
+ resp = RING_GET_RESPONSE(usb_ring, i);
/* May need to deal with batching and with putting a ceiling on
the number dispatched for performance and anti-dos reasons */
/* Move from CLOSED to DISCONNECTED state. */
sring = (usbif_sring_t *)__get_free_page(GFP_KERNEL);
- SHARED_RING_INIT(USBIF_RING, sring);
- FRONT_RING_INIT(USBIF_RING, &xhci->usb_ring, sring);
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&xhci->usb_ring, sring);
xhci->state = USBIF_STATE_DISCONNECTED;
/* Construct an interface-CONNECT message for the domain controller. */
#include <string.h>
#include <unistd.h>
-
#define __COMPILING_BLKTAP_LIB
#include "blktaplib.h"
#define BLKTAP_IOCTL_KICK 1
-// this is in the header now
-//DEFINE_RING_TYPES(blkif, blkif_request_t, blkif_response_t);
-
void got_sig_bus();
void got_sig_int();
-
/* in kernel these are opposite, but we are a consumer now. */
blkif_back_ring_t fe_ring; /* slightly counterintuitive ;) */
blkif_front_ring_t be_ring;
ctrl_back_ring_t ctrl_ring;
-
-
unsigned long mmap_vstart = 0;
char *blktap_mem;
int fd = 0;
#define BLKTAP_MMAP_PAGES \
((BLKIF_MAX_SEGMENTS_PER_REQUEST + 1) * BLKIF_RING_SIZE)
#define BLKTAP_MMAP_REGION_SIZE (BLKTAP_RING_PAGES + BLKTAP_MMAP_PAGES)
-
-
int bad_count = 0;
void bad(void)
blkif_request_t *req_d;
//req_d = FRONT_RING_NEXT_EMPTY_REQUEST(&be_ring);
- req_d = RING_GET_REQUEST(BLKIF_RING, &be_ring, be_ring.req_prod_pvt);
+ req_d = RING_GET_REQUEST(&be_ring, be_ring.req_prod_pvt);
memcpy(req_d, req, sizeof(blkif_request_t));
wmb();
be_ring.req_prod_pvt++;
blkif_response_t *rsp_d;
//rsp_d = BACK_RING_NEXT_EMPTY_RESPONSE(&fe_ring);
- rsp_d = RING_GET_RESPONSE(BLKIF_RING, &fe_ring, fe_ring.rsp_prod_pvt);
+ rsp_d = RING_GET_RESPONSE(&fe_ring, fe_ring.rsp_prod_pvt);
memcpy(rsp_d, rsp, sizeof(blkif_response_t));
wmb();
fe_ring.rsp_prod_pvt++;
{
apply_rsp_hooks(rsp);
write_rsp_to_fe_ring(rsp);
- RING_PUSH_RESPONSES(BLKIF_RING, &fe_ring);
+ RING_PUSH_RESPONSES(&fe_ring);
ioctl(fd, BLKTAP_IOCTL_KICK_FE);
}
/* assign the rings to the mapped memory */
csring = (ctrl_sring_t *)blktap_mem;
- BACK_RING_INIT(CTRL_RING, &ctrl_ring, csring);
+ BACK_RING_INIT(&ctrl_ring, csring);
sring = (blkif_sring_t *)((unsigned long)blktap_mem + PAGE_SIZE);
- FRONT_RING_INIT(BLKIF_RING, &be_ring, sring);
+ FRONT_RING_INIT(&be_ring, sring);
sring = (blkif_sring_t *)((unsigned long)blktap_mem + (2 *PAGE_SIZE));
- BACK_RING_INIT(BLKIF_RING, &fe_ring, sring);
+ BACK_RING_INIT(&fe_ring, sring);
mmap_vstart = (unsigned long)blktap_mem + (BLKTAP_RING_PAGES << PAGE_SHIFT);
rmb();
for (i = ctrl_ring.req_cons; i < rp; i++)
{
- msg = RING_GET_REQUEST(CTRL_RING, &ctrl_ring, i);
+ msg = RING_GET_REQUEST(&ctrl_ring, i);
ctrl_hook = ctrl_hook_chain;
while (ctrl_hook != NULL)
}
/* Using this as a unidirectional ring. */
ctrl_ring.req_cons = ctrl_ring.rsp_prod_pvt = i;
- RING_PUSH_RESPONSES(CTRL_RING, &ctrl_ring);
+ RING_PUSH_RESPONSES(&ctrl_ring);
/* empty the fe_ring */
notify_fe = 0;
- notify_be = RING_HAS_UNCONSUMED_REQUESTS(BLKIF_RING, &fe_ring);
+ notify_be = RING_HAS_UNCONSUMED_REQUESTS(&fe_ring);
rp = fe_ring.sring->req_prod;
rmb();
for (i = fe_ring.req_cons; i != rp; i++)
{
int done = 0; /* stop forwarding this request */
- req = RING_GET_REQUEST(BLKIF_RING, &fe_ring, i);
+ req = RING_GET_REQUEST(&fe_ring, i);
DPRINTF("copying an fe request\n");
fe_ring.req_cons = i;
/* empty the be_ring */
- notify_fe |= RING_HAS_UNCONSUMED_RESPONSES(BLKIF_RING, &be_ring);
+ notify_fe |= RING_HAS_UNCONSUMED_RESPONSES(&be_ring);
rp = be_ring.sring->rsp_prod;
rmb();
for (i = be_ring.rsp_cons; i != rp; i++)
{
- rsp = RING_GET_RESPONSE(BLKIF_RING, &be_ring, i);
+ rsp = RING_GET_RESPONSE(&be_ring, i);
DPRINTF("copying a be request\n");
if (notify_be) {
DPRINTF("notifying be\n");
- RING_PUSH_REQUESTS(BLKIF_RING, &be_ring);
+ RING_PUSH_REQUESTS(&be_ring);
ioctl(fd, BLKTAP_IOCTL_KICK_BE);
}
if (notify_fe) {
DPRINTF("notifying fe\n");
- RING_PUSH_RESPONSES(BLKIF_RING, &fe_ring);
+ RING_PUSH_RESPONSES(&fe_ring);
ioctl(fd, BLKTAP_IOCTL_KICK_FE);
}
}
control_msg_t *smsg;
RING_IDX c = cc->tx_ring.req_cons;
- if ( !RING_HAS_UNCONSUMED_REQUESTS(CTRL_RING, &cc->tx_ring) )
+ if ( !RING_HAS_UNCONSUMED_REQUESTS(&cc->tx_ring) )
{
DPRINTF("no request to read\n");
return -1;
}
rmb(); /* make sure we see the data associated with the request */
- smsg = RING_GET_REQUEST(CTRL_RING, &cc->tx_ring, c);
+ smsg = RING_GET_REQUEST(&cc->tx_ring, c);
memcpy(&dmsg->msg, smsg, sizeof(*smsg));
if ( dmsg->msg.length > sizeof(dmsg->msg.msg) )
dmsg->msg.length = sizeof(dmsg->msg.msg);
control_msg_t *dmsg;
RING_IDX p = cc->rx_ring.req_prod_pvt;
- if ( RING_FULL(CTRL_RING, &cc->rx_ring) )
+ if ( RING_FULL(&cc->rx_ring) )
{
DPRINTF("no space to write request");
return -ENOSPC;
}
- dmsg = RING_GET_REQUEST(CTRL_RING, &cc->rx_ring, p);
+ dmsg = RING_GET_REQUEST(&cc->rx_ring, p);
memcpy(dmsg, &smsg->msg, sizeof(*dmsg));
wmb();
cc->rx_ring.req_prod_pvt++;
- RING_PUSH_REQUESTS(CTRL_RING, &cc->rx_ring);
+ RING_PUSH_REQUESTS(&cc->rx_ring);
return 0;
}
control_msg_t *smsg;
RING_IDX c = cc->rx_ring.rsp_cons;
- if ( !RING_HAS_UNCONSUMED_RESPONSES(CTRL_RING, &cc->rx_ring) )
+ if ( !RING_HAS_UNCONSUMED_RESPONSES(&cc->rx_ring) )
{
DPRINTF("no response to read");
return -1;
}
rmb(); /* make sure we see the data associated with the request */
- smsg = RING_GET_RESPONSE(CTRL_RING, &cc->rx_ring, c);
+ smsg = RING_GET_RESPONSE(&cc->rx_ring, c);
memcpy(&dmsg->msg, smsg, sizeof(*smsg));
if ( dmsg->msg.length > sizeof(dmsg->msg.msg) )
dmsg->msg.length = sizeof(dmsg->msg.msg);
return -ENOSPC;
}
- dmsg = RING_GET_RESPONSE(CTRL_RING, &cc->tx_ring, p);
+ dmsg = RING_GET_RESPONSE(&cc->tx_ring, p);
memcpy(dmsg, &smsg->msg, sizeof(*dmsg));
wmb();
cc->tx_ring.rsp_prod_pvt++;
- RING_PUSH_RESPONSES(CTRL_RING, &cc->tx_ring);
+ RING_PUSH_RESPONSES(&cc->tx_ring);
return 0;
}
int ctrl_chan_request_to_read(control_channel_t *cc)
{
- return (RING_HAS_UNCONSUMED_REQUESTS(CTRL_RING, &cc->tx_ring));
+ return (RING_HAS_UNCONSUMED_REQUESTS(&cc->tx_ring));
}
int ctrl_chan_space_to_write_request(control_channel_t *cc)
{
- return (!(RING_FULL(CTRL_RING, &cc->rx_ring)));
+ return (!(RING_FULL(&cc->rx_ring)));
}
int ctrl_chan_response_to_read(control_channel_t *cc)
{
- return (RING_HAS_UNCONSUMED_RESPONSES(CTRL_RING, &cc->rx_ring));
+ return (RING_HAS_UNCONSUMED_RESPONSES(&cc->rx_ring));
}
int ctrl_chan_space_to_write_response(control_channel_t *cc)
}
/* Synchronise ring indexes. */
- BACK_RING_ATTACH(CTRL_RING, &cc->tx_ring, &cc->interface->tx_ring);
- FRONT_RING_ATTACH(CTRL_RING, &cc->rx_ring, &cc->interface->rx_ring);
+ BACK_RING_ATTACH(&cc->tx_ring, &cc->interface->tx_ring);
+ FRONT_RING_ATTACH(&cc->rx_ring, &cc->interface->rx_ring);
cc->connected = 1;
* Generate blkif ring structures and types.
*/
-#define BLKIF_RING RING_PARAMS(blkif_request_t, blkif_response_t, PAGE_SIZE)
-DEFINE_RING_TYPES(blkif, BLKIF_RING);
+DEFINE_RING_TYPES(blkif, blkif_request_t, blkif_response_t, PAGE_SIZE);
/*
* BLKIF_OP_PROBE:
* CONTROL_RING_MEM is currently an 8-slot ring of ctrl_msg_t structs and
* two 32-bit counters: (64 * 8) + (2 * 4) = 520
*/
-#define CONTROL_RING_MEM 520
-#define CTRL_RING RING_PARAMS(control_msg_t, control_msg_t, CONTROL_RING_MEM)
-DEFINE_RING_TYPES(ctrl, CTRL_RING);
+#define CONTROL_RING_MEM 520
+DEFINE_RING_TYPES(ctrl, control_msg_t, control_msg_t, CONTROL_RING_MEM);
typedef struct {
ctrl_sring_t tx_ring; /* 0: guest -> controller */
typedef unsigned int RING_IDX;
-/* This is horrible: it rounds a 32-bit unsigned constant down to the
- * nearest power of two, by finding the highest set bit. */
-#define __RD2PO2(_x) (((_x) & 0x80000000) ? 0x80000000 : \
- ((_x) & 0x40000000) ? 0x40000000 : \
- ((_x) & 0x20000000) ? 0x20000000 : \
- ((_x) & 0x10000000) ? 0x10000000 : \
- ((_x) & 0x08000000) ? 0x08000000 : \
- ((_x) & 0x04000000) ? 0x04000000 : \
- ((_x) & 0x02000000) ? 0x02000000 : \
- ((_x) & 0x01000000) ? 0x01000000 : \
- ((_x) & 0x00800000) ? 0x00800000 : \
- ((_x) & 0x00400000) ? 0x00400000 : \
- ((_x) & 0x00200000) ? 0x00200000 : \
- ((_x) & 0x00100000) ? 0x00100000 : \
- ((_x) & 0x00080000) ? 0x00080000 : \
- ((_x) & 0x00040000) ? 0x00040000 : \
- ((_x) & 0x00020000) ? 0x00020000 : \
- ((_x) & 0x00010000) ? 0x00010000 : \
- ((_x) & 0x00008000) ? 0x00008000 : \
- ((_x) & 0x00004000) ? 0x00004000 : \
- ((_x) & 0x00002000) ? 0x00002000 : \
- ((_x) & 0x00001000) ? 0x00001000 : \
- ((_x) & 0x00000800) ? 0x00000800 : \
- ((_x) & 0x00000400) ? 0x00000400 : \
- ((_x) & 0x00000200) ? 0x00000200 : \
- ((_x) & 0x00000100) ? 0x00000100 : \
- ((_x) & 0x00000080) ? 0x00000080 : \
- ((_x) & 0x00000040) ? 0x00000040 : \
- ((_x) & 0x00000020) ? 0x00000020 : \
- ((_x) & 0x00000010) ? 0x00000010 : \
- ((_x) & 0x00000008) ? 0x00000008 : \
- ((_x) & 0x00000004) ? 0x00000004 : \
- ((_x) & 0x00000002) ? 0x00000002 : \
- ((_x) & 0x00000001) ? 0x00000001 : 0x00000000)
-
-/* Given a shared ring, tell me how many entries there are in it. The
- * rule is: a ring contains as many entries as will fit, rounded down to
- * the nearest power of two (so we can mask with (size-1) to loop
- * around) */
-#define __SRING_SIZE(__params, __esize) \
- __RD2PO2((sizeof((__params)->size) - (2 * sizeof(RING_IDX))) / (__esize))
-#define SRING_SIZE(__params, __sringp) \
- __SRING_SIZE(__params, sizeof (__sringp)->ring[0])
+/* Round a 32-bit unsigned constant down to the nearest power of two. */
+#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
+#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
+#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
+#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
+#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
+
+/*
+ * Calculate size of a shared ring, given the total available space for the
+ * ring and indexes (_sz), and the name tag of the request/response structure.
+ * S ring contains as many entries as will fit, rounded down to the nearest
+ * power of two (so we can mask with (size-1) to loop around).
+ */
+#define __RING_SIZE(_name, _sz) \
+ (__RD32(((_sz) - 2*sizeof(RING_IDX)) / sizeof(union _name##_sring_entry)))
/*
* Macros to make the correct C datatypes for a new kind of ring.
*
* In a header where you want the ring datatype declared, you then do:
*
- * #define MY_RING RING_PARAMS(request_t, response_t, PAGE_SIZE)
- * DEFINE_RING_TYPES(mytag, MY_RING);
+ * DEFINE_RING_TYPES(mytag, request_t, response_t, PAGE_SIZE);
*
* These expand out to give you a set of types, as you can see below.
* The most important of these are:
* mytag_front_ring_t - The 'front' half of the ring.
* mytag_back_ring_t - The 'back' half of the ring.
*
- * Use the RING_PARAMS define (MY_RING above) as a first parameter on all
- * the ring functions. To initialize a ring in your code, on the front
- * half, you do a:
+ * To initialize a ring in your code, on the front half, you do:
*
* mytag_front_ring_t front_ring;
*
- * SHARED_RING_INIT(MY_RING, (mytag_sring_t *)shared_page)
- * FRONT_RING_INIT(MY_RING, &front_ring, (mytag_sring_t *)shared_page)
+ * SHARED_RING_INIT((mytag_sring_t *)shared_page);
+ * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page);
*
* Initializing the back follows similarly...
*/
-/* NB: RING SIZING. (a note to ease future debugging...)
- *
- * Passing size information into the ring macros is made difficult by
- * the lack of a reasonable constant declaration in C. To get around this,
- * the RING_PARAMS define places the requested size of the ring as the
- * static size of the 'size' array in the anonymous RING_PARAMS struct.
- * While this struct is never actually instantiated, __SRING_SIZE is
- * able to use sizeof() to get at the constant size.
- */
-
-#define RING_PARAMS(__req_t, __rsp_t, __size) \
-((struct { \
- char size[__size]; \
- __req_t req; \
- __rsp_t rsp; \
- \
-} *) 0)
-
-
-#define DEFINE_RING_TYPES(__name, __params) \
+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t, __size) \
\
/* Shared ring entry */ \
union __name##_sring_entry { \
- typeof ((__params)->req) req; \
- typeof ((__params)->rsp) rsp; \
+ __req_t req; \
+ __rsp_t rsp; \
} PACKED; \
\
/* Shared ring page */ \
struct __name##_sring { \
RING_IDX req_prod; \
RING_IDX rsp_prod; \
- union __name##_sring_entry \
- ring[__SRING_SIZE(__params, sizeof (union __name##_sring_entry))]; \
+ union __name##_sring_entry ring[__RING_SIZE(__name, __size)]; \
} PACKED; \
\
/* "Front" end's private variables */ \
*
* N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
* This is OK in 1-for-1 request-response situations where the
- * requestor (front end) never has more than SRING_SIZE()-1
+ * requestor (front end) never has more than RING_SIZE()-1
* outstanding requests.
*/
/* Initialising empty rings */
-#define SHARED_RING_INIT(_p, _s) do { \
+#define SHARED_RING_INIT(_s) do { \
(_s)->req_prod = 0; \
(_s)->rsp_prod = 0; \
} while(0)
-#define FRONT_RING_INIT(_p, _r, _s) do { \
+#define FRONT_RING_INIT(_r, _s) do { \
(_r)->req_prod_pvt = 0; \
(_r)->rsp_cons = 0; \
(_r)->sring = (_s); \
} while (0)
-#define BACK_RING_INIT(_p, _r, _s) do { \
+#define BACK_RING_INIT(_r, _s) do { \
(_r)->rsp_prod_pvt = 0; \
(_r)->req_cons = 0; \
(_r)->sring = (_s); \
} while (0)
/* Initialize to existing shared indexes -- for recovery */
-#define FRONT_RING_ATTACH(_p, _r, _s) do { \
+#define FRONT_RING_ATTACH(_r, _s) do { \
(_r)->sring = (_s); \
(_r)->req_prod_pvt = (_s)->req_prod; \
(_r)->rsp_cons = (_s)->rsp_prod; \
} while (0)
-#define BACK_RING_ATTACH(_p, _r, _s) do { \
+#define BACK_RING_ATTACH(_r, _s) do { \
(_r)->sring = (_s); \
(_r)->rsp_prod_pvt = (_s)->rsp_prod; \
(_r)->req_cons = (_s)->req_prod; \
} while (0)
-
-/* How to mask off a number for use as an offset into a ring
- * N.B. This evalutes its second argument once but its first often */
-#define __SHARED_RING_MASK(_p, _s, _i) \
- ((_i) & (SRING_SIZE((_p), (_s)) - 1))
-
/* How big is this ring? */
-#define RING_SIZE(_p, _r) SRING_SIZE((_p), (_r)->sring)
+#define RING_SIZE(_r) \
+ (sizeof((_r)->sring->ring)/sizeof((_r)->sring->ring[0]))
/* How many empty slots are on a ring? */
-#define RING_PENDING_REQUESTS(_p, _r) \
+#define RING_PENDING_REQUESTS(_r) \
( ((_r)->req_prod_pvt - (_r)->rsp_cons) )
/* Test if there is an empty slot available on the front ring.
* (This is only meaningful from the front. )
*/
-#define RING_FULL(_p, _r) \
- (((_r)->req_prod_pvt - (_r)->rsp_cons) == SRING_SIZE((_p), (_r)->sring))
+#define RING_FULL(_r) \
+ (((_r)->req_prod_pvt - (_r)->rsp_cons) == RING_SIZE(_r))
/* Test if there are outstanding messages to be processed on a ring. */
-#define RING_HAS_UNCONSUMED_RESPONSES(_p, _r) \
+#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
( (_r)->rsp_cons != (_r)->sring->rsp_prod )
-#define RING_HAS_UNCONSUMED_REQUESTS(_p, _r) \
+#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
( ((_r)->req_cons != (_r)->sring->req_prod ) && \
(((_r)->req_cons - (_r)->rsp_prod_pvt) != \
- SRING_SIZE((_p), (_r)->sring)) )
+ RING_SIZE(_r)) )
/* Test if there are messages waiting to be pushed. */
-#define RING_HAS_UNPUSHED_REQUESTS(_p, _r) \
+#define RING_HAS_UNPUSHED_REQUESTS(_r) \
( (_r)->req_prod_pvt != (_r)->sring->req_prod )
-#define RING_HAS_UNPUSHED_RESPONSES(_p, _r) \
+#define RING_HAS_UNPUSHED_RESPONSES(_r) \
( (_r)->rsp_prod_pvt != (_r)->sring->rsp_prod )
-
/* Copy the private producer pointer into the shared ring so the other end
* can see the updates we've made. */
-#define RING_PUSH_REQUESTS(_p, _r) do { \
+#define RING_PUSH_REQUESTS(_r) do { \
wmb(); \
(_r)->sring->req_prod = (_r)->req_prod_pvt; \
} while (0)
-#define RING_PUSH_RESPONSES(_p, _r) do { \
+#define RING_PUSH_RESPONSES(_r) do { \
wmb(); \
(_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
} while (0)
-/* Direct access to individual ring elements, by index.
- */
-#define RING_GET_REQUEST(_p, _r, _idx) \
+/* Direct access to individual ring elements, by index. */
+#define RING_GET_REQUEST(_r, _idx) \
(&((_r)->sring->ring[ \
- __SHARED_RING_MASK((_p), (_r)->sring, (_idx)) \
+ ((_idx) & (RING_SIZE(_r) - 1)) \
].req))
-#define RING_GET_RESPONSE(_p, _r, _idx) \
+#define RING_GET_RESPONSE(_r, _idx) \
(&((_r)->sring->ring[ \
- __SHARED_RING_MASK((_p), (_r)->sring, (_idx)) \
+ ((_idx) & (RING_SIZE(_r) - 1)) \
].rsp))
-/* Loop termination condition: Would the specified index overflow the
- * ring?
- */
-#define RING_REQUEST_CONS_OVERFLOW(_p, _r, _cons) \
- (((_cons) - (_r)->rsp_prod_pvt) >= SRING_SIZE((_p), (_r)->sring))
+/* Loop termination condition: Would the specified index overflow the ring? */
+#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
+ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
#endif /* __XEN_PUBLIC_IO_RING_H__ */
#define USBIF_RSP_ERROR -1 /* non-specific 'error' */
#define USBIF_RSP_OKAY 0 /* non-specific 'okay' */
-#define USBIF_RING RING_PARAMS(usbif_request_t, usbif_response_t, PAGE_SIZE)
-DEFINE_RING_TYPES(usbif, USBIF_RING);
+DEFINE_RING_TYPES(usbif, usbif_request_t, usbif_response_t, PAGE_SIZE);
typedef struct {
unsigned long length; /* IN = expected, OUT = actual */